bitkeeper revision 1.1236.12.1 (422d6eadP8rlqEFriOCwM-dW2OyQTA)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 8 Mar 2005 09:21:49 +0000 (09:21 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 8 Mar 2005 09:21:49 +0000 (09:21 +0000)
Fix ring definitions to not use typeof. The accessor macros no longer
take a 'params' argument.
Signed-off-by: Keir Fraser <keir.fraser@cl.cam.ac.uk>
16 files changed:
linux-2.6.10-xen-sparse/arch/xen/kernel/ctrl_if.c
linux-2.6.10-xen-sparse/drivers/xen/blkback/blkback.c
linux-2.6.10-xen-sparse/drivers/xen/blkback/interface.c
linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c
linux-2.6.10-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c
linux-2.6.10-xen-sparse/drivers/xen/blktap/blktap_datapath.c
linux-2.6.10-xen-sparse/drivers/xen/blktap/blktap_userdev.c
linux-2.6.10-xen-sparse/drivers/xen/usbback/interface.c
linux-2.6.10-xen-sparse/drivers/xen/usbback/usbback.c
linux-2.6.10-xen-sparse/drivers/xen/usbfront/usbfront.c
tools/blktap/blktaplib.c
tools/xcs/ctrl_interface.c
xen/include/public/io/blkif.h
xen/include/public/io/domain_controller.h
xen/include/public/io/ring.h
xen/include/public/io/usbif.h

index 3d305718a63ca7040173a15af6562cc83599131d..5b38ba59cb1e1418c939d583afa807baa3508c2a 100644 (file)
  * Extra ring macros to sync a consumer index up to the public producer index. 
  * Generally UNSAFE, but we use it for recovery and shutdown in some cases.
  */
-#define RING_DROP_PENDING_REQUESTS(_p, _r)                              \
+#define RING_DROP_PENDING_REQUESTS(_r)                                  \
     do {                                                                \
         (_r)->req_cons = (_r)->sring->req_prod;                         \
     } while (0)
-#define RING_DROP_PENDING_RESPONSES(_p, _r)                             \
+#define RING_DROP_PENDING_RESPONSES(_r)                                 \
     do {                                                                \
         (_r)->rsp_cons = (_r)->sring->rsp_prod;                         \
     } while (0)
@@ -125,7 +125,7 @@ static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
 static void __ctrl_if_tx_tasklet(unsigned long data)
 {
     ctrl_msg_t *msg;
-    int         was_full = RING_FULL(CTRL_RING, &ctrl_if_tx_ring);
+    int         was_full = RING_FULL(&ctrl_if_tx_ring);
     RING_IDX    i, rp;
 
     i  = ctrl_if_tx_ring.rsp_cons;
@@ -134,7 +134,7 @@ static void __ctrl_if_tx_tasklet(unsigned long data)
 
     for ( ; i != rp; i++ )
     {
-        msg = RING_GET_RESPONSE(CTRL_RING, &ctrl_if_tx_ring, i);
+        msg = RING_GET_RESPONSE(&ctrl_if_tx_ring, i);
         
         DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", i-1,
                 ctrl_if_tx_ring.sring->rsp_prod,
@@ -157,7 +157,7 @@ static void __ctrl_if_tx_tasklet(unsigned long data)
     smp_mb();
     ctrl_if_tx_ring.rsp_cons = i;
             
-    if ( was_full && !RING_FULL(CTRL_RING, &ctrl_if_tx_ring) )
+    if ( was_full && !RING_FULL(&ctrl_if_tx_ring) )
     {
         wake_up(&ctrl_if_tx_wait);
         run_task_queue(&ctrl_if_tx_tq);
@@ -193,7 +193,7 @@ static void __ctrl_if_rx_tasklet(unsigned long data)
  
     for ( ; i != rp; i++) 
     {
-        pmsg = RING_GET_REQUEST(CTRL_RING, &ctrl_if_rx_ring, i);
+        pmsg = RING_GET_REQUEST(&ctrl_if_rx_ring, i);
         memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
 
         DPRINTK("Rx-Req %u/%u :: %d/%d\n", i-1,
@@ -227,10 +227,10 @@ static void __ctrl_if_rx_tasklet(unsigned long data)
 static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
                                      struct pt_regs *regs)
 {
-    if ( RING_HAS_UNCONSUMED_RESPONSES(CTRL_RING, &ctrl_if_tx_ring) )
+    if ( RING_HAS_UNCONSUMED_RESPONSES(&ctrl_if_tx_ring) )
         tasklet_schedule(&ctrl_if_tx_tasklet);
 
-    if ( RING_HAS_UNCONSUMED_REQUESTS(CTRL_RING, &ctrl_if_rx_ring) )
+    if ( RING_HAS_UNCONSUMED_REQUESTS(&ctrl_if_rx_ring) )
         tasklet_schedule(&ctrl_if_rx_tasklet);
 
     return IRQ_HANDLED;
@@ -248,7 +248,7 @@ ctrl_if_send_message_noblock(
 
     spin_lock_irqsave(&ctrl_if_lock, flags);
 
-    if ( RING_FULL(CTRL_RING, &ctrl_if_tx_ring) )
+    if ( RING_FULL(&ctrl_if_tx_ring) )
     {
         spin_unlock_irqrestore(&ctrl_if_lock, flags);
         return -EAGAIN;
@@ -269,11 +269,11 @@ ctrl_if_send_message_noblock(
             ctrl_if_tx_ring.rsp_cons,
             msg->type, msg->subtype);
 
-    dmsg = RING_GET_REQUEST(CTRL_RING, &ctrl_if_tx_ring, 
+    dmsg = RING_GET_REQUEST(&ctrl_if_tx_ring, 
             ctrl_if_tx_ring.req_prod_pvt);
     memcpy(dmsg, msg, sizeof(*msg));
     ctrl_if_tx_ring.req_prod_pvt++;
-    RING_PUSH_REQUESTS(CTRL_RING, &ctrl_if_tx_ring);
+    RING_PUSH_REQUESTS(&ctrl_if_tx_ring);
 
     spin_unlock_irqrestore(&ctrl_if_lock, flags);
 
@@ -373,7 +373,7 @@ ctrl_if_enqueue_space_callback(
     struct tq_struct *task)
 {
     /* Fast path. */
-    if ( !RING_FULL(CTRL_RING, &ctrl_if_tx_ring) )
+    if ( !RING_FULL(&ctrl_if_tx_ring) )
         return 0;
 
     (void)queue_task(task, &ctrl_if_tx_tq);
@@ -384,7 +384,7 @@ ctrl_if_enqueue_space_callback(
      * certainly return 'not full'.
      */
     smp_mb();
-    return RING_FULL(CTRL_RING, &ctrl_if_tx_ring);
+    return RING_FULL(&ctrl_if_tx_ring);
 }
 
 void
@@ -404,13 +404,13 @@ ctrl_if_send_response(
             ctrl_if_rx_ring.rsp_prod_pvt, 
             msg->type, msg->subtype);
 
-    dmsg = RING_GET_RESPONSE(CTRL_RING, &ctrl_if_rx_ring, 
+    dmsg = RING_GET_RESPONSE(&ctrl_if_rx_ring, 
             ctrl_if_rx_ring.rsp_prod_pvt);
     if ( dmsg != msg )
         memcpy(dmsg, msg, sizeof(*msg));
 
     ctrl_if_rx_ring.rsp_prod_pvt++;
-    RING_PUSH_RESPONSES(CTRL_RING, &ctrl_if_rx_ring);
+    RING_PUSH_RESPONSES(&ctrl_if_rx_ring);
 
     spin_unlock_irqrestore(&ctrl_if_lock, flags);
 
@@ -501,8 +501,8 @@ void ctrl_if_resume(void)
     }
 
     /* Sync up with shared indexes. */
-    RING_DROP_PENDING_RESPONSES(CTRL_RING, &ctrl_if_tx_ring);
-    RING_DROP_PENDING_REQUESTS(CTRL_RING, &ctrl_if_rx_ring);
+    RING_DROP_PENDING_RESPONSES(&ctrl_if_tx_ring);
+    RING_DROP_PENDING_REQUESTS(&ctrl_if_rx_ring);
 
     ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
     ctrl_if_irq    = bind_evtchn_to_irq(ctrl_if_evtchn);
@@ -521,8 +521,8 @@ void __init ctrl_if_init(void)
     for ( i = 0; i < 256; i++ )
         ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
 
-    FRONT_RING_ATTACH(CTRL_RING, &ctrl_if_tx_ring, &ctrl_if->tx_ring);
-    BACK_RING_ATTACH(CTRL_RING, &ctrl_if_rx_ring, &ctrl_if->rx_ring);
+    FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
+    BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
     
     spin_lock_init(&ctrl_if_lock);
 
@@ -552,7 +552,7 @@ int ctrl_if_transmitter_empty(void)
 
 void ctrl_if_discard_responses(void)
 {
-    RING_DROP_PENDING_RESPONSES(CTRL_RING, &ctrl_if_tx_ring);
+    RING_DROP_PENDING_RESPONSES(&ctrl_if_tx_ring);
 }
 
 EXPORT_SYMBOL(ctrl_if_send_message_noblock);
index 4f74a1c5142d95979b1fe095647cf75b419a0740..ab9fc3c977192116004bdf5664d0099063ad9304 100644 (file)
@@ -287,7 +287,7 @@ static int do_block_io_op(blkif_t *blkif, int max_to_do)
     rmb(); /* Ensure we see queued requests up to 'rp'. */
 
     for ( i = blk_ring->req_cons; 
-         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(BLKIF_RING, blk_ring, i);
+         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
           i++ )
     {
         if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
@@ -296,7 +296,7 @@ static int do_block_io_op(blkif_t *blkif, int max_to_do)
             break;
         }
         
-        req = RING_GET_REQUEST(BLKIF_RING, blk_ring, i);
+        req = RING_GET_REQUEST(blk_ring, i);
         switch ( req->operation )
         {
         case BLKIF_OP_READ:
@@ -561,13 +561,13 @@ static void make_response(blkif_t *blkif, unsigned long id,
 
     /* Place on the response ring for the relevant domain. */ 
     spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-    resp = RING_GET_RESPONSE(BLKIF_RING, blk_ring, blk_ring->rsp_prod_pvt);
+    resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
     resp->id        = id;
     resp->operation = op;
     resp->status    = st;
     wmb(); /* Ensure other side can see the response fields. */
     blk_ring->rsp_prod_pvt++;
-    RING_PUSH_RESPONSES(BLKIF_RING, blk_ring);
+    RING_PUSH_RESPONSES(blk_ring);
     spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
 
     /* Kick the relevant domain. */
index 087e02d6202f8bfe43e2ceacf3d558d090ad1ad7..08c7a13267e5425f76b5d65c8ebaf8c2d443fe97 100644 (file)
@@ -197,8 +197,8 @@ void blkif_connect(blkif_be_connect_t *connect)
         return;
     }
     sring = (blkif_sring_t *)vma->addr;
-    SHARED_RING_INIT(BLKIF_RING, sring);
-    BACK_RING_INIT(BLKIF_RING, &blkif->blk_ring, sring);
+    SHARED_RING_INIT(sring);
+    BACK_RING_INIT(&blkif->blk_ring, sring);
     
     blkif->evtchn        = evtchn;
     blkif->irq           = bind_evtchn_to_irq(evtchn);
index 65c7011e76dce839942e963731086af041fb869a..12d4449c9332acadc0d2357e559a7ef069a2ab79 100644 (file)
@@ -75,7 +75,7 @@ static blkif_response_t blkif_control_rsp;
 static blkif_front_ring_t blk_ring;
 
 unsigned long rec_ring_free;
-blkif_request_t rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)];
+blkif_request_t rec_ring[RING_SIZE(&blk_ring)];
 
 static int recovery = 0;           /* "Recovery in progress" flag.  Protected
                                     * by the blkif_io_lock */
@@ -90,7 +90,7 @@ static inline int GET_ID_FROM_FREELIST( void )
 {
     unsigned long free = rec_ring_free;
 
-    if ( free > RING_SIZE(BLKIF_RING, &blk_ring) )
+    if ( free > RING_SIZE(&blk_ring) )
         BUG();
 
     rec_ring_free = rec_ring[free].id;
@@ -151,7 +151,7 @@ static inline void translate_req_to_mfn(blkif_request_t *xreq,
 static inline void flush_requests(void)
 {
     DISABLE_SCATTERGATHER();
-    RING_PUSH_REQUESTS(BLKIF_RING, &blk_ring);
+    RING_PUSH_REQUESTS(&blk_ring);
     notify_via_evtchn(blkif_evtchn);
 }
 
@@ -331,7 +331,7 @@ static int blkif_queue_request(struct request *req)
         return 1;
 
     /* Fill out a communications ring structure. */
-    ring_req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
+    ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
     id = GET_ID_FROM_FREELIST();
     rec_ring[id].id = (unsigned long) req;
 
@@ -384,7 +384,7 @@ void do_blkif_request(request_queue_t *rq)
             continue;
         }
 
-        if ( RING_FULL(BLKIF_RING, &blk_ring) )
+        if ( RING_FULL(&blk_ring) )
         {
             blk_stop_queue(rq);
             break;
@@ -429,7 +429,7 @@ static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
     {
         unsigned long id;
 
-        bret = RING_GET_RESPONSE(BLKIF_RING, &blk_ring, i);
+        bret = RING_GET_RESPONSE(&blk_ring, i);
         id = bret->id;
         req = (struct request *)rec_ring[id].id;
         blkif_completion( &rec_ring[id] );
@@ -515,11 +515,11 @@ static void kick_pending_request_queues(void)
 {
     /* We kick pending request queues if the ring is reasonably empty. */
     if ( (nr_pending != 0) && 
-         (RING_PENDING_REQUESTS(BLKIF_RING, &blk_ring) < 
-          (RING_SIZE(BLKIF_RING, &blk_ring) >> 1)) )
+         (RING_PENDING_REQUESTS(&blk_ring) < 
+          (RING_SIZE(&blk_ring) >> 1)) )
     {
         /* Attempt to drain the queue, but bail if the ring becomes full. */
-        while ( (nr_pending != 0) && !RING_FULL(BLKIF_RING, &blk_ring) )
+        while ( (nr_pending != 0) && !RING_FULL(&blk_ring) )
             do_blkif_request(pending_queues[--nr_pending]);
     }
 }
@@ -813,7 +813,7 @@ static int blkif_queue_request(unsigned long   id,
              (sg_dev == device) &&
              (sg_next_sect == sector_number) )
         {
-            req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, 
+            req = RING_GET_REQUEST(&blk_ring, 
                                    blk_ring.req_prod_pvt - 1);
             bh = (struct buffer_head *)id;
      
@@ -834,7 +834,7 @@ static int blkif_queue_request(unsigned long   id,
 
             return 0;
         }
-        else if ( RING_FULL(BLKIF_RING, &blk_ring) )
+        else if ( RING_FULL(&blk_ring) )
         {
             return 1;
         }
@@ -851,7 +851,7 @@ static int blkif_queue_request(unsigned long   id,
     }
 
     /* Fill out a communications ring structure. */
-    req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
+    req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
 
     xid = GET_ID_FROM_FREELIST();
     rec_ring[xid].id = id;
@@ -976,7 +976,7 @@ static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
         unsigned long id;
         blkif_response_t *bret;
         
-        bret = RING_GET_RESPONSE(BLKIF_RING, &blk_ring, i);
+        bret = RING_GET_RESPONSE(&blk_ring, i);
         id = bret->id;
         bh = (struct buffer_head *)rec_ring[id].id; 
 
@@ -1026,21 +1026,21 @@ void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
     blkif_request_t *req_d;
 
  retry:
-    while ( RING_FULL(BLKIF_RING, &blk_ring) )
+    while ( RING_FULL(&blk_ring) )
     {
         set_current_state(TASK_INTERRUPTIBLE);
         schedule_timeout(1);
     }
 
     spin_lock_irqsave(&blkif_io_lock, flags);
-    if ( RING_FULL(BLKIF_RING, &blk_ring) )
+    if ( RING_FULL(&blk_ring) )
     {
         spin_unlock_irqrestore(&blkif_io_lock, flags);
         goto retry;
     }
 
     DISABLE_SCATTERGATHER();
-    req_d = RING_GET_REQUEST(BLKIF_RING, &blk_ring, blk_ring.req_prod_pvt);
+    req_d = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
     *req_d = *req;    
 
     id = GET_ID_FROM_FREELIST();
@@ -1130,8 +1130,8 @@ static void blkif_disconnect(void)
         free_page((unsigned long)blk_ring.sring);
     
     sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
-    SHARED_RING_INIT(BLKIF_RING, sring);
-    FRONT_RING_INIT(BLKIF_RING, &blk_ring, sring);
+    SHARED_RING_INIT(sring);
+    FRONT_RING_INIT(&blk_ring, sring);
     blkif_state  = BLKIF_STATE_DISCONNECTED;
     blkif_send_interface_connect();
 }
@@ -1151,11 +1151,11 @@ static void blkif_recover(void)
      * This will need to be fixed once we have barriers */
 
     /* Stage 1 : Find active and move to safety. */
-    for ( i = 0; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
+    for ( i = 0; i < RING_SIZE(&blk_ring); i++ )
     {
         if ( rec_ring[i].id >= PAGE_OFFSET )
         {
-            req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, 
+            req = RING_GET_REQUEST(&blk_ring, 
                                    blk_ring.req_prod_pvt);
             translate_req_to_mfn(req, &rec_ring[i]);
             blk_ring.req_prod_pvt++;
@@ -1165,17 +1165,17 @@ static void blkif_recover(void)
     /* Stage 2 : Set up shadow list. */
     for ( i = 0; i < blk_ring.req_prod_pvt; i++ ) 
     {
-        req = RING_GET_REQUEST(BLKIF_RING, &blk_ring, i);
+        req = RING_GET_REQUEST(&blk_ring, i);
         rec_ring[i].id = req->id;  
         req->id = i;
         translate_req_to_pfn(&rec_ring[i], req);
     }
 
     /* Stage 3 : Set up free list. */
-    for ( ; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
+    for ( ; i < RING_SIZE(&blk_ring); i++ )
         rec_ring[i].id = i+1;
     rec_ring_free = blk_ring.req_prod_pvt;
-    rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)-1].id = 0x0fffffff;
+    rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
 
     /* blk_ring->req_prod will be set when we flush_requests().*/
     wmb();
@@ -1368,9 +1368,9 @@ int __init xlblk_init(void)
     printk(KERN_INFO "xen_blk: Initialising virtual block device driver\n");
 
     rec_ring_free = 0;
-    for ( i = 0; i < RING_SIZE(BLKIF_RING, &blk_ring); i++ )
+    for ( i = 0; i < RING_SIZE(&blk_ring); i++ )
         rec_ring[i].id = i+1;
-    rec_ring[RING_SIZE(BLKIF_RING, &blk_ring)-1].id = 0x0fffffff;
+    rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
 
     (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
                                     CALLBACK_IN_BLOCKING_CONTEXT);
index 98a76f15af4e5bf88ae6e06c2e75fb4a332983ce..9ce74c7442e75292099d4c50ccfe58f537fb4b27 100644 (file)
@@ -228,8 +228,8 @@ void blkif_ptfe_connect(blkif_be_connect_t *connect)
     }
 
     sring = (blkif_sring_t *)vma->addr;
-    SHARED_RING_INIT(BLKIF_RING, sring);
-    BACK_RING_INIT(BLKIF_RING, &blkif->blk_ring, sring);
+    SHARED_RING_INIT(sring);
+    BACK_RING_INIT(&blkif->blk_ring, sring);
     
     blkif->evtchn        = evtchn;
     blkif->irq           = bind_evtchn_to_irq(evtchn);
@@ -301,8 +301,8 @@ static void blkif_ptbe_disconnect(void)
     blkif_sring_t *sring;
     
     sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
-    SHARED_RING_INIT(BLKIF_RING, sring);
-    FRONT_RING_INIT(BLKIF_RING, &blktap_be_ring, sring);
+    SHARED_RING_INIT(sring);
+    FRONT_RING_INIT(&blktap_be_ring, sring);
     blktap_be_state  = BLKIF_STATE_DISCONNECTED;
     DPRINTK("Blkif-Passthrough-BE is now DISCONNECTED.\n");
     blkif_ptbe_send_interface_connect();
index 62e98b660183583b0252d8f46b774b21535cdb7f..dc2c0a739090f12613ba4060feb435bd33c652cb 100644 (file)
@@ -97,7 +97,7 @@ inline int write_resp_to_fe_ring(blkif_t *blkif, blkif_response_t *rsp)
     ar = &active_reqs[ID_TO_IDX(rsp->id)];
     rsp->id = ar->id;
             
-    resp_d = RING_GET_RESPONSE(BLKIF_RING, &blkif->blk_ring,
+    resp_d = RING_GET_RESPONSE(&blkif->blk_ring,
             blkif->blk_ring.rsp_prod_pvt);
     memcpy(resp_d, rsp, sizeof(blkif_response_t));
     wmb();
@@ -118,7 +118,7 @@ inline int write_req_to_be_ring(blkif_request_t *req)
         return 0;
     }
     
-    req_d = RING_GET_REQUEST(BLKIF_RING, &blktap_be_ring,
+    req_d = RING_GET_REQUEST(&blktap_be_ring,
             blktap_be_ring.req_prod_pvt);
     memcpy(req_d, req, sizeof(blkif_request_t));
     wmb();
@@ -129,7 +129,7 @@ inline int write_req_to_be_ring(blkif_request_t *req)
 
 inline void kick_fe_domain(blkif_t *blkif) 
 {
-    RING_PUSH_RESPONSES(BLKIF_RING, &blkif->blk_ring);
+    RING_PUSH_RESPONSES(&blkif->blk_ring);
     notify_via_evtchn(blkif->evtchn);
     DPRINTK("notified FE(dom %u)\n", blkif->domid);
     
@@ -141,7 +141,7 @@ inline void kick_be_domain(void)
         return;
     
     wmb(); /* Ensure that the frontend can see the requests. */
-    RING_PUSH_REQUESTS(BLKIF_RING, &blktap_be_ring);
+    RING_PUSH_REQUESTS(&blktap_be_ring);
     notify_via_evtchn(blktap_be_evtchn);
     DPRINTK("notified BE\n");
 }
@@ -299,7 +299,7 @@ static int do_block_io_op(blkif_t *blkif, int max_to_do)
     
     for ( i = blkif->blk_ring.req_cons; 
          (i != rp) && 
-            !RING_REQUEST_CONS_OVERFLOW(BLKIF_RING, &blkif->blk_ring, i);
+            !RING_REQUEST_CONS_OVERFLOW(&blkif->blk_ring, i);
           i++ )
     {
         
@@ -309,7 +309,7 @@ static int do_block_io_op(blkif_t *blkif, int max_to_do)
             break;
         }
         
-        req_s = RING_GET_REQUEST(BLKIF_RING, &blkif->blk_ring, i);
+        req_s = RING_GET_REQUEST(&blkif->blk_ring, i);
         /* This is a new request:  
          * Assign an active request record, and remap the id. 
          */
@@ -354,10 +354,9 @@ static int do_block_io_op(blkif_t *blkif, int max_to_do)
             /* copy the request message to the BERing */
 
             DPRINTK("blktap: FERing[%u] -> BERing[%u]\n", 
-                    (unsigned)__SHARED_RING_MASK(BLKIF_RING, 
-                        blktap_be_ring.sring, i), 
-                    (unsigned)__SHARED_RING_MASK(BLKIF_RING, 
-                        blktap_be_ring.sring, blktap_be_ring.req_prod_pvt));
+                    (unsigned)i & (RING_SIZE(&blktap_be_ring)-1),
+                    (unsigned)blktap_be_ring.req_prod_pvt & 
+                    (RING_SIZE((&blktap_be_ring)-1)));
             
             write_req_to_be_ring(req_s);
             notify_be = 1;
@@ -398,7 +397,7 @@ irqreturn_t blkif_ptbe_int(int irq, void *dev_id,
       
     for ( i = blktap_be_ring.rsp_cons; i != rp; i++)
     {
-        resp_s = RING_GET_RESPONSE(BLKIF_RING, &blktap_be_ring, i);
+        resp_s = RING_GET_RESPONSE(&blktap_be_ring, i);
         
         /* BE -> FE interposition point is here. */
     
@@ -426,11 +425,9 @@ irqreturn_t blkif_ptbe_int(int irq, void *dev_id,
             /* Copy the response message to FERing */
          
             DPRINTK("blktap: BERing[%u] -> FERing[%u]\n", 
-                    (unsigned)__SHARED_RING_MASK(BLKIF_RING, 
-                        blkif->blk_ring.sring, i), 
-                    (unsigned)__SHARED_RING_MASK(BLKIF_RING, 
-                        blkif->blk_ring.sring, 
-                        blkif->blk_ring.rsp_prod_pvt));
+                    (unsigned)i & (RING_SIZE(&blkif->blk_ring)-1),
+                    (unsigned)blkif->blk_ring.rsp_prod_pvt & 
+                    (RING_SIZE((&blkif->blk_ring)-1)));
 
             write_resp_to_fe_ring(blkif, resp_s);
             kick_fe_domain(blkif);
index 1876287a124b76a7be539bfa7c5c18fd185b6afb..9e94f800bf6d3ed5164ec1324c8883afb839589a 100644 (file)
@@ -86,8 +86,8 @@ static int blktap_open(struct inode *inode, struct file *filp)
 
     SetPageReserved(virt_to_page(csring));
     
-    SHARED_RING_INIT(CTRL_RING, csring);
-    FRONT_RING_INIT(CTRL_RING, &blktap_uctrl_ring, csring);
+    SHARED_RING_INIT(csring);
+    FRONT_RING_INIT(&blktap_uctrl_ring, csring);
 
 
     /* Allocate the fe ring. */
@@ -97,8 +97,8 @@ static int blktap_open(struct inode *inode, struct file *filp)
 
     SetPageReserved(virt_to_page(sring));
     
-    SHARED_RING_INIT(BLKIF_RING, sring);
-    FRONT_RING_INIT(BLKIF_RING, &blktap_ufe_ring, sring);
+    SHARED_RING_INIT(sring);
+    FRONT_RING_INIT(&blktap_ufe_ring, sring);
 
     /* Allocate the be ring. */
     sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
@@ -107,8 +107,8 @@ static int blktap_open(struct inode *inode, struct file *filp)
 
     SetPageReserved(virt_to_page(sring));
     
-    SHARED_RING_INIT(BLKIF_RING, sring);
-    BACK_RING_INIT(BLKIF_RING, &blktap_ube_ring, sring);
+    SHARED_RING_INIT(sring);
+    BACK_RING_INIT(&blktap_ube_ring, sring);
 
     DPRINTK(KERN_ALERT "blktap open.\n");
 
@@ -252,13 +252,13 @@ static unsigned int blktap_poll(struct file *file, poll_table *wait)
 {
         poll_wait(file, &blktap_wait, wait);
 
-        if ( RING_HAS_UNPUSHED_REQUESTS(BLKIF_RING, &blktap_uctrl_ring) ||
-             RING_HAS_UNPUSHED_REQUESTS(BLKIF_RING, &blktap_ufe_ring)   ||
-             RING_HAS_UNPUSHED_RESPONSES(BLKIF_RING, &blktap_ube_ring) ) {
+        if ( RING_HAS_UNPUSHED_REQUESTS(&blktap_uctrl_ring) ||
+             RING_HAS_UNPUSHED_REQUESTS(&blktap_ufe_ring)   ||
+             RING_HAS_UNPUSHED_RESPONSES(&blktap_ube_ring) ) {
 
-            RING_PUSH_REQUESTS(BLKIF_RING, &blktap_uctrl_ring);
-            RING_PUSH_REQUESTS(BLKIF_RING, &blktap_ufe_ring);
-            RING_PUSH_RESPONSES(BLKIF_RING, &blktap_ube_ring);
+            RING_PUSH_REQUESTS(&blktap_uctrl_ring);
+            RING_PUSH_REQUESTS(&blktap_ufe_ring);
+            RING_PUSH_RESPONSES(&blktap_ube_ring);
             return POLLIN | POLLRDNORM;
         }
 
@@ -298,12 +298,12 @@ int blktap_write_fe_ring(blkif_request_t *req)
         return 0;
     }
 
-    if ( RING_FULL(BLKIF_RING, &blktap_ufe_ring) ) {
+    if ( RING_FULL(&blktap_ufe_ring) ) {
         DPRINTK("blktap: fe_ring is full, can't add.\n");
         return 0;
     }
 
-    target = RING_GET_REQUEST(BLKIF_RING, &blktap_ufe_ring,
+    target = RING_GET_REQUEST(&blktap_ufe_ring,
             blktap_ufe_ring.req_prod_pvt);
     memcpy(target, req, sizeof(*req));
 
@@ -344,7 +344,7 @@ int blktap_write_be_ring(blkif_response_t *rsp)
 
     /* No test for fullness in the response direction. */
 
-    target = RING_GET_RESPONSE(BLKIF_RING, &blktap_ube_ring,
+    target = RING_GET_RESPONSE(&blktap_ube_ring,
             blktap_ube_ring.rsp_prod_pvt);
     memcpy(target, rsp, sizeof(*rsp));
 
@@ -375,7 +375,7 @@ static int blktap_read_fe_ring(void)
         
         for ( i = blktap_ufe_ring.rsp_cons; i != rp; i++ )
         {
-            resp_s = RING_GET_RESPONSE(BLKIF_RING, &blktap_ufe_ring, i);
+            resp_s = RING_GET_RESPONSE(&blktap_ufe_ring, i);
             
             DPRINTK("resp->fe_ring\n");
             ar = lookup_active_req(ID_TO_IDX(resp_s->id));
@@ -406,7 +406,7 @@ static int blktap_read_be_ring(void)
         rmb();
         for ( i = blktap_ube_ring.req_cons; i != rp; i++ )
         {
-            req_s = RING_GET_REQUEST(BLKIF_RING, &blktap_ube_ring, i);
+            req_s = RING_GET_REQUEST(&blktap_ube_ring, i);
 
             DPRINTK("req->be_ring\n");
             write_req_to_be_ring(req_s);
@@ -430,7 +430,7 @@ int blktap_write_ctrl_ring(ctrl_msg_t *msg)
 
     /* No test for fullness in the response direction. */
 
-    target = RING_GET_REQUEST(CTRL_RING, &blktap_uctrl_ring,
+    target = RING_GET_REQUEST(&blktap_uctrl_ring,
             blktap_uctrl_ring.req_prod_pvt);
     memcpy(target, msg, sizeof(*msg));
 
index ad240aa1298adb48d78fb3be2537b7d177a58a7d..4630da8951add4d6e2c6fd46278fc1a32e8d6076 100644 (file)
@@ -194,8 +194,8 @@ void usbif_connect(usbif_be_connect_t *connect)
     }
 
     sring = (usbif_sring_t *)vma->addr;
-    SHARED_RING_INIT(USBIF_RING, sring);
-    BACK_RING_INIT(USBIF_RING, &up->usb_ring, sring);
+    SHARED_RING_INIT(sring);
+    BACK_RING_INIT(&up->usb_ring, sring);
 
     up->evtchn        = evtchn;
     up->irq           = bind_evtchn_to_irq(evtchn);
index a969a8cd156a3f4c9c52a861e26ba11e26c865ac..fc98ac18a756c797569235cfd55dffcf4d98bfe4 100644 (file)
@@ -398,7 +398,7 @@ static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
     
     /* Take items off the comms ring, taking care not to overflow. */
     for ( i = usb_ring->req_cons; 
-          (i != rp) && !RING_REQUEST_CONS_OVERFLOW(USBIF_RING, usb_ring, i);
+          (i != rp) && !RING_REQUEST_CONS_OVERFLOW(usb_ring, i);
           i++ )
     {
         if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
@@ -407,7 +407,7 @@ static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
             break;
         }
 
-        req = RING_GET_REQUEST(USBIF_RING, usb_ring, i);
+        req = RING_GET_REQUEST(usb_ring, i);
         
         switch ( req->operation )
         {
@@ -808,7 +808,7 @@ static void make_response(usbif_priv_t *up, unsigned long id,
 
     /* Place on the response ring for the relevant domain. */ 
     spin_lock_irqsave(&up->usb_ring_lock, flags);
-    resp = RING_GET_RESPONSE(USBIF_RING, usb_ring, usb_ring->rsp_prod_pvt);
+    resp = RING_GET_RESPONSE(usb_ring, usb_ring->rsp_prod_pvt);
     resp->id        = id;
     resp->operation = op;
     resp->status    = st;
@@ -819,7 +819,7 @@ static void make_response(usbif_priv_t *up, unsigned long id,
     dump_response(resp);
 
     usb_ring->rsp_prod_pvt++;
-    RING_PUSH_RESPONSES(USBIF_RING, usb_ring);
+    RING_PUSH_RESPONSES(usb_ring);
     spin_unlock_irqrestore(&up->usb_ring_lock, flags);
 
     /* Kick the relevant domain. */
index 264c3e6b073337d97a939520d77a8a5ad38f279a..5fe65a7cbab66ad188df6edac8dcbd6090e0b273 100644 (file)
@@ -214,7 +214,7 @@ static int xhci_queue_req(struct urb *urb)
 #endif
         
 
-        if ( RING_FULL(USBIF_RING, usb_ring) )
+        if ( RING_FULL(usb_ring) )
         {
                 printk(KERN_WARNING
                        "xhci_queue_req(): USB ring full, not queuing request\n");
@@ -222,7 +222,7 @@ static int xhci_queue_req(struct urb *urb)
         }
 
         /* Stick something in the shared communications ring. */
-       req = RING_GET_REQUEST(USBIF_RING, usb_ring, usb_ring->req_prod_pvt);
+       req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
 
         req->operation       = USBIF_OP_IO;
         req->port            = 0; /* We don't care what the port is. */
@@ -251,7 +251,7 @@ static int xhci_queue_req(struct urb *urb)
                 memset(req->setup, 0, 8);
         
         usb_ring->req_prod_pvt++;
-        RING_PUSH_REQUESTS(USBIF_RING, usb_ring);
+        RING_PUSH_REQUESTS(usb_ring);
 
        notify_via_evtchn(xhci->evtchn);
 
@@ -277,7 +277,7 @@ static inline usbif_request_t *xhci_queue_probe(usbif_vdev_t port)
               usbif->resp_prod, xhci->usb_resp_cons);
 #endif
         
-        if ( RING_FULL(USBIF_RING, usb_ring) )
+        if ( RING_FULL(usb_ring) )
         {
                 printk(KERN_WARNING
                        "xhci_queue_probe(): ring full, not queuing request\n");
@@ -285,7 +285,7 @@ static inline usbif_request_t *xhci_queue_probe(usbif_vdev_t port)
         }
 
         /* Stick something in the shared communications ring. */
-        req = RING_GET_REQUEST(USBIF_RING, usb_ring, usb_ring->req_prod_pvt);
+        req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
 
         memset(req, sizeof(*req), 0);
 
@@ -293,7 +293,7 @@ static inline usbif_request_t *xhci_queue_probe(usbif_vdev_t port)
         req->port            = port;
 
         usb_ring->req_prod_pvt++;
-        RING_PUSH_REQUESTS(USBIF_RING, usb_ring);
+        RING_PUSH_REQUESTS(usb_ring);
 
        notify_via_evtchn(xhci->evtchn);
 
@@ -313,7 +313,7 @@ static int xhci_port_reset(usbif_vdev_t port)
         xhci->awaiting_reset = 1;
         
         /* Stick something in the shared communications ring. */
-       req = RING_GET_REQUEST(USBIF_RING, usb_ring, usb_ring->req_prod_pvt);
+       req = RING_GET_REQUEST(usb_ring, usb_ring->req_prod_pvt);
 
         memset(req, sizeof(*req), 0);
 
@@ -321,7 +321,7 @@ static int xhci_port_reset(usbif_vdev_t port)
         req->port            = port;
         
         usb_ring->req_prod_pvt++;
-       RING_PUSH_REQUESTS(USBIF_RING, usb_ring);
+       RING_PUSH_REQUESTS(usb_ring);
 
        notify_via_evtchn(xhci->evtchn);
 
@@ -427,7 +427,7 @@ static void xhci_drain_ring(void)
         /* Take items off the comms ring, taking care not to overflow. */
         for ( i = usb_ring->rsp_cons; i != rp; i++ )
         {
-            resp = RING_GET_RESPONSE(USBIF_RING, usb_ring, i);
+            resp = RING_GET_RESPONSE(usb_ring, i);
             
             /* May need to deal with batching and with putting a ceiling on
                the number dispatched for performance and anti-dos reasons */
@@ -1497,8 +1497,8 @@ static void usbif_status_change(usbif_fe_interface_status_changed_t *status)
 
         /* Move from CLOSED to DISCONNECTED state. */
         sring = (usbif_sring_t *)__get_free_page(GFP_KERNEL);
-        SHARED_RING_INIT(USBIF_RING, sring);
-        FRONT_RING_INIT(USBIF_RING, &xhci->usb_ring, sring);
+        SHARED_RING_INIT(sring);
+        FRONT_RING_INIT(&xhci->usb_ring, sring);
         xhci->state  = USBIF_STATE_DISCONNECTED;
 
         /* Construct an interface-CONNECT message for the domain controller. */
index 2399a20d7a1542f023e7f7a0acb711f8d5df554d..a50eaa909bc53050e06668d019a1a4acc9042a23 100644 (file)
@@ -22,7 +22,6 @@
 #include <string.h>
 #include <unistd.h>
                                                                      
-
 #define __COMPILING_BLKTAP_LIB
 #include "blktaplib.h"
 
 
 #define BLKTAP_IOCTL_KICK 1
 
-// this is in the header now
-//DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t);
-
 void got_sig_bus();
 void got_sig_int();
 
-
 /* in kernel these are opposite, but we are a consumer now. */
 blkif_back_ring_t  fe_ring; /* slightly counterintuitive ;) */
 blkif_front_ring_t be_ring; 
 ctrl_back_ring_t   ctrl_ring;
 
-
-
 unsigned long mmap_vstart = 0;
 char *blktap_mem;
 int fd = 0;
@@ -60,8 +53,6 @@ int fd = 0;
 #define BLKTAP_MMAP_PAGES \
     ((BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) * BLKIF_RING_SIZE)
 #define BLKTAP_MMAP_REGION_SIZE (BLKTAP_RING_PAGES + BLKTAP_MMAP_PAGES)
-
-
     
 int bad_count = 0;
 void bad(void)
@@ -208,7 +199,7 @@ inline int write_req_to_be_ring(blkif_request_t *req)
     blkif_request_t *req_d;
 
     //req_d = FRONT_RING_NEXT_EMPTY_REQUEST(&be_ring);
-    req_d = RING_GET_REQUEST(BLKIF_RING, &be_ring, be_ring.req_prod_pvt);
+    req_d = RING_GET_REQUEST(&be_ring, be_ring.req_prod_pvt);
     memcpy(req_d, req, sizeof(blkif_request_t));
     wmb();
     be_ring.req_prod_pvt++;
@@ -221,7 +212,7 @@ inline int write_rsp_to_fe_ring(blkif_response_t *rsp)
     blkif_response_t *rsp_d;
 
     //rsp_d = BACK_RING_NEXT_EMPTY_RESPONSE(&fe_ring);
-    rsp_d = RING_GET_RESPONSE(BLKIF_RING, &fe_ring, fe_ring.rsp_prod_pvt);
+    rsp_d = RING_GET_RESPONSE(&fe_ring, fe_ring.rsp_prod_pvt);
     memcpy(rsp_d, rsp, sizeof(blkif_response_t));
     wmb();
     fe_ring.rsp_prod_pvt++;
@@ -251,7 +242,7 @@ void blktap_inject_response(blkif_response_t *rsp)
 {
     apply_rsp_hooks(rsp);
     write_rsp_to_fe_ring(rsp);
-    RING_PUSH_RESPONSES(BLKIF_RING, &fe_ring);
+    RING_PUSH_RESPONSES(&fe_ring);
     ioctl(fd, BLKTAP_IOCTL_KICK_FE);
 }
 
@@ -371,13 +362,13 @@ int blktap_listen(void)
 
     /* assign the rings to the mapped memory */
     csring = (ctrl_sring_t *)blktap_mem;
-    BACK_RING_INIT(CTRL_RING, &ctrl_ring, csring);
+    BACK_RING_INIT(&ctrl_ring, csring);
     
     sring = (blkif_sring_t *)((unsigned long)blktap_mem + PAGE_SIZE);
-    FRONT_RING_INIT(BLKIF_RING, &be_ring, sring);
+    FRONT_RING_INIT(&be_ring, sring);
     
     sring = (blkif_sring_t *)((unsigned long)blktap_mem + (2 *PAGE_SIZE));
-    BACK_RING_INIT(BLKIF_RING, &fe_ring, sring);
+    BACK_RING_INIT(&fe_ring, sring);
 
     mmap_vstart = (unsigned long)blktap_mem + (BLKTAP_RING_PAGES << PAGE_SHIFT);
     
@@ -431,7 +422,7 @@ int blktap_listen(void)
             rmb();
             for (i = ctrl_ring.req_cons; i < rp; i++)
             {
-                msg = RING_GET_REQUEST(CTRL_RING, &ctrl_ring, i);
+                msg = RING_GET_REQUEST(&ctrl_ring, i);
 
                 ctrl_hook = ctrl_hook_chain;
                 while (ctrl_hook != NULL)
@@ -444,18 +435,18 @@ int blktap_listen(void)
             }
             /* Using this as a unidirectional ring. */
             ctrl_ring.req_cons = ctrl_ring.rsp_prod_pvt = i;
-            RING_PUSH_RESPONSES(CTRL_RING, &ctrl_ring);
+            RING_PUSH_RESPONSES(&ctrl_ring);
             
             /* empty the fe_ring */
             notify_fe = 0;
-            notify_be = RING_HAS_UNCONSUMED_REQUESTS(BLKIF_RING, &fe_ring);
+            notify_be = RING_HAS_UNCONSUMED_REQUESTS(&fe_ring);
             rp = fe_ring.sring->req_prod;
             rmb();
             for (i = fe_ring.req_cons; i != rp; i++)
             {
                 int done = 0; /* stop forwarding this request */
 
-                req = RING_GET_REQUEST(BLKIF_RING, &fe_ring, i);
+                req = RING_GET_REQUEST(&fe_ring, i);
 
                 DPRINTF("copying an fe request\n");
 
@@ -489,13 +480,13 @@ int blktap_listen(void)
             fe_ring.req_cons = i;
 
             /* empty the be_ring */
-            notify_fe |= RING_HAS_UNCONSUMED_RESPONSES(BLKIF_RING, &be_ring);
+            notify_fe |= RING_HAS_UNCONSUMED_RESPONSES(&be_ring);
             rp = be_ring.sring->rsp_prod;
             rmb();
             for (i = be_ring.rsp_cons; i != rp; i++)
             {
 
-                rsp = RING_GET_RESPONSE(BLKIF_RING, &be_ring, i);
+                rsp = RING_GET_RESPONSE(&be_ring, i);
 
                 DPRINTF("copying a be request\n");
 
@@ -508,13 +499,13 @@ int blktap_listen(void)
 
             if (notify_be) {
                 DPRINTF("notifying be\n");
-                RING_PUSH_REQUESTS(BLKIF_RING, &be_ring);
+                RING_PUSH_REQUESTS(&be_ring);
                 ioctl(fd, BLKTAP_IOCTL_KICK_BE);
             }
 
             if (notify_fe) {
                 DPRINTF("notifying fe\n");
-                RING_PUSH_RESPONSES(BLKIF_RING, &fe_ring);
+                RING_PUSH_RESPONSES(&fe_ring);
                 ioctl(fd, BLKTAP_IOCTL_KICK_FE);
             }
         }        
index 0896910cb4005c411cdbcc06be7455a8bc3fc465..b2fbad998aef90a22bae859653e562bc69e5194a 100644 (file)
@@ -57,14 +57,14 @@ int ctrl_chan_read_request(control_channel_t *cc, xcs_control_msg_t *dmsg)
     control_msg_t     *smsg;
     RING_IDX          c = cc->tx_ring.req_cons;
 
-    if ( !RING_HAS_UNCONSUMED_REQUESTS(CTRL_RING, &cc->tx_ring) )
+    if ( !RING_HAS_UNCONSUMED_REQUESTS(&cc->tx_ring) )
     {
         DPRINTF("no request to read\n");
         return -1;
     }
     
     rmb(); /* make sure we see the data associated with the request */
-    smsg = RING_GET_REQUEST(CTRL_RING, &cc->tx_ring, c);
+    smsg = RING_GET_REQUEST(&cc->tx_ring, c);
     memcpy(&dmsg->msg, smsg, sizeof(*smsg));
     if ( dmsg->msg.length > sizeof(dmsg->msg.msg) )
         dmsg->msg.length = sizeof(dmsg->msg.msg);
@@ -78,18 +78,18 @@ int ctrl_chan_write_request(control_channel_t *cc,
     control_msg_t *dmsg;
     RING_IDX       p = cc->rx_ring.req_prod_pvt;
     
-    if ( RING_FULL(CTRL_RING, &cc->rx_ring) )
+    if ( RING_FULL(&cc->rx_ring) )
     {
         DPRINTF("no space to write request");
         return -ENOSPC;
     }
 
-    dmsg = RING_GET_REQUEST(CTRL_RING, &cc->rx_ring, p);
+    dmsg = RING_GET_REQUEST(&cc->rx_ring, p);
     memcpy(dmsg, &smsg->msg, sizeof(*dmsg));
 
     wmb();
     cc->rx_ring.req_prod_pvt++;
-    RING_PUSH_REQUESTS(CTRL_RING, &cc->rx_ring);
+    RING_PUSH_REQUESTS(&cc->rx_ring);
     
     return 0;
 }
@@ -99,14 +99,14 @@ int ctrl_chan_read_response(control_channel_t *cc, xcs_control_msg_t *dmsg)
     control_msg_t     *smsg;
     RING_IDX          c = cc->rx_ring.rsp_cons;
     
-    if ( !RING_HAS_UNCONSUMED_RESPONSES(CTRL_RING, &cc->rx_ring) )
+    if ( !RING_HAS_UNCONSUMED_RESPONSES(&cc->rx_ring) )
     {
         DPRINTF("no response to read");
         return -1;
     }
 
     rmb(); /* make sure we see the data associated with the request */
-    smsg = RING_GET_RESPONSE(CTRL_RING, &cc->rx_ring, c);
+    smsg = RING_GET_RESPONSE(&cc->rx_ring, c);
     memcpy(&dmsg->msg, smsg, sizeof(*smsg));
     if ( dmsg->msg.length > sizeof(dmsg->msg.msg) )
         dmsg->msg.length = sizeof(dmsg->msg.msg);
@@ -128,29 +128,29 @@ int ctrl_chan_write_response(control_channel_t *cc,
         return -ENOSPC;
     }
 
-    dmsg = RING_GET_RESPONSE(CTRL_RING, &cc->tx_ring, p);
+    dmsg = RING_GET_RESPONSE(&cc->tx_ring, p);
     memcpy(dmsg, &smsg->msg, sizeof(*dmsg));
 
     wmb();
     cc->tx_ring.rsp_prod_pvt++;
-    RING_PUSH_RESPONSES(CTRL_RING, &cc->tx_ring);
+    RING_PUSH_RESPONSES(&cc->tx_ring);
     
     return 0;
 }
 
 int ctrl_chan_request_to_read(control_channel_t *cc)
 {
-    return (RING_HAS_UNCONSUMED_REQUESTS(CTRL_RING, &cc->tx_ring));
+    return (RING_HAS_UNCONSUMED_REQUESTS(&cc->tx_ring));
 }
 
 int ctrl_chan_space_to_write_request(control_channel_t *cc)
 {
-    return (!(RING_FULL(CTRL_RING, &cc->rx_ring)));
+    return (!(RING_FULL(&cc->rx_ring)));
 }
 
 int ctrl_chan_response_to_read(control_channel_t *cc)
 {
-    return (RING_HAS_UNCONSUMED_RESPONSES(CTRL_RING, &cc->rx_ring));
+    return (RING_HAS_UNCONSUMED_RESPONSES(&cc->rx_ring));
 }
 
 int ctrl_chan_space_to_write_response(control_channel_t *cc)
@@ -186,8 +186,8 @@ int ctrl_chan_connect(control_channel_t *cc)
     }
 
     /* Synchronise ring indexes. */
-    BACK_RING_ATTACH(CTRL_RING, &cc->tx_ring, &cc->interface->tx_ring);
-    FRONT_RING_ATTACH(CTRL_RING, &cc->rx_ring, &cc->interface->rx_ring);
+    BACK_RING_ATTACH(&cc->tx_ring, &cc->interface->tx_ring);
+    FRONT_RING_ATTACH(&cc->rx_ring, &cc->interface->rx_ring);
 
     cc->connected = 1;
 
index 35b1b78f84bd984692e5ecce11e75ab51ca12921..95b74b71b9833331fc3b71ecd0202ccb86f23022 100644 (file)
@@ -57,8 +57,7 @@ typedef struct {
  * Generate blkif ring structures and types.
  */
 
-#define BLKIF_RING RING_PARAMS(blkif_request_t, blkif_response_t, PAGE_SIZE)
-DEFINE_RING_TYPES(blkif, BLKIF_RING);
+DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t, PAGE_SIZE);
 
 /*
  * BLKIF_OP_PROBE:
index 491c309a0f13178b143bef4885ec817bf7c98be5..5742e58e401fa32936221ebb19db00e2b0882b3b 100644 (file)
@@ -45,9 +45,8 @@ typedef u32 CONTROL_RING_IDX;
  * CONTROL_RING_MEM is currently an 8-slot ring of ctrl_msg_t structs and
  * two 32-bit counters:  (64 * 8) + (2 * 4) = 520
  */
-#define CONTROL_RING_MEM 520 
-#define CTRL_RING RING_PARAMS(control_msg_t, control_msg_t, CONTROL_RING_MEM)
-DEFINE_RING_TYPES(ctrl, CTRL_RING);
+#define CONTROL_RING_MEM 520
+DEFINE_RING_TYPES(ctrl, control_msg_t, control_msg_t, CONTROL_RING_MEM);
 
 typedef struct {
     ctrl_sring_t tx_ring; /*    0: guest -> controller  */
index 8efeac4e98766de9b646bf89ab6796387606265b..eb0179249cb5baa7507d8c0b33e2420005f6d475 100644 (file)
@@ -8,49 +8,21 @@
 
 typedef unsigned int RING_IDX;
 
-/* This is horrible: it rounds a 32-bit unsigned constant down to the
- * nearest power of two, by finding the highest set bit. */
-#define __RD2PO2(_x) (((_x) & 0x80000000) ? 0x80000000 :                \
-                      ((_x) & 0x40000000) ? 0x40000000 :                \
-                      ((_x) & 0x20000000) ? 0x20000000 :                \
-                      ((_x) & 0x10000000) ? 0x10000000 :                \
-                      ((_x) & 0x08000000) ? 0x08000000 :                \
-                      ((_x) & 0x04000000) ? 0x04000000 :                \
-                      ((_x) & 0x02000000) ? 0x02000000 :                \
-                      ((_x) & 0x01000000) ? 0x01000000 :                \
-                      ((_x) & 0x00800000) ? 0x00800000 :                \
-                      ((_x) & 0x00400000) ? 0x00400000 :                \
-                      ((_x) & 0x00200000) ? 0x00200000 :                \
-                      ((_x) & 0x00100000) ? 0x00100000 :                \
-                      ((_x) & 0x00080000) ? 0x00080000 :                \
-                      ((_x) & 0x00040000) ? 0x00040000 :                \
-                      ((_x) & 0x00020000) ? 0x00020000 :                \
-                      ((_x) & 0x00010000) ? 0x00010000 :                \
-                      ((_x) & 0x00008000) ? 0x00008000 :                \
-                      ((_x) & 0x00004000) ? 0x00004000 :                \
-                      ((_x) & 0x00002000) ? 0x00002000 :                \
-                      ((_x) & 0x00001000) ? 0x00001000 :                \
-                      ((_x) & 0x00000800) ? 0x00000800 :                \
-                      ((_x) & 0x00000400) ? 0x00000400 :                \
-                      ((_x) & 0x00000200) ? 0x00000200 :                \
-                      ((_x) & 0x00000100) ? 0x00000100 :                \
-                      ((_x) & 0x00000080) ? 0x00000080 :                \
-                      ((_x) & 0x00000040) ? 0x00000040 :                \
-                      ((_x) & 0x00000020) ? 0x00000020 :                \
-                      ((_x) & 0x00000010) ? 0x00000010 :                \
-                      ((_x) & 0x00000008) ? 0x00000008 :                \
-                      ((_x) & 0x00000004) ? 0x00000004 :                \
-                      ((_x) & 0x00000002) ? 0x00000002 :                \
-                      ((_x) & 0x00000001) ? 0x00000001 : 0x00000000)
-
-/* Given a shared ring, tell me how many entries there are in it.  The
- * rule is: a ring contains as many entries as will fit, rounded down to
- * the nearest power of two (so we can mask with (size-1) to loop
- * around) */
-#define __SRING_SIZE(__params, __esize)                                 \
-    __RD2PO2((sizeof((__params)->size) - (2 * sizeof(RING_IDX))) / (__esize))
-#define SRING_SIZE(__params, __sringp)                                  \
-    __SRING_SIZE(__params, sizeof (__sringp)->ring[0])
+/* Round a 32-bit unsigned constant down to the nearest power of two. */
+#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2                  : ((_x) & 0x1))
+#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
+#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
+
+/*
+ * Calculate size of a shared ring, given the total available space for the
+ * ring and indexes (_sz), and the name tag of the request/response structure.
+ * S ring contains as many entries as will fit, rounded down to the nearest 
+ * power of two (so we can mask with (size-1) to loop around).
+ */
+#define __RING_SIZE(_name, _sz)                                         \
+    (__RD32(((_sz) - 2*sizeof(RING_IDX)) / sizeof(union _name##_sring_entry)))
 
 /*
  *  Macros to make the correct C datatypes for a new kind of ring.
@@ -62,8 +34,7 @@ typedef unsigned int RING_IDX;
  *
  *  In a header where you want the ring datatype declared, you then do:
  *
- *   #define MY_RING RING_PARAMS(request_t, response_t, PAGE_SIZE)
- *   DEFINE_RING_TYPES(mytag, MY_RING);
+ *     DEFINE_RING_TYPES(mytag, request_t, response_t, PAGE_SIZE);
  *
  *  These expand out to give you a set of types, as you can see below.
  *  The most important of these are:
@@ -72,51 +43,29 @@ typedef unsigned int RING_IDX;
  *     mytag_front_ring_t - The 'front' half of the ring.
  *     mytag_back_ring_t  - The 'back' half of the ring.
  *
- *  Use the RING_PARAMS define (MY_RING above) as a first parameter on all
- *  the ring functions.  To initialize a ring in your code, on the front 
- *  half, you do a:
+ *  To initialize a ring in your code, on the front half, you do:
  *
  *      mytag_front_ring_t front_ring;
  *
- *      SHARED_RING_INIT(MY_RING, (mytag_sring_t *)shared_page)
- *      FRONT_RING_INIT(MY_RING, &front_ring, (mytag_sring_t *)shared_page)
+ *      SHARED_RING_INIT((mytag_sring_t *)shared_page);
+ *      FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page);
  *
  *  Initializing the back follows similarly...
  */
          
-/*  NB: RING SIZING. (a note to ease future debugging...)
- *
- *  Passing size information into the ring macros is made difficult by 
- *  the lack of a reasonable constant declaration in C.  To get around this,
- *  the RING_PARAMS define places the requested size of the ring as the 
- *  static size of the 'size' array in the anonymous RING_PARAMS struct.
- *  While this struct is never actually instantiated, __SRING_SIZE is 
- *  able to use sizeof() to get at the constant size.
- */
-
-#define RING_PARAMS(__req_t, __rsp_t, __size)                           \
-((struct {                                                              \
-    char size[__size];                                                  \
-    __req_t req;                                                        \
-    __rsp_t rsp;                                                        \
-                                                                        \
-} *) 0)
-
-
-#define DEFINE_RING_TYPES(__name, __params)                             \
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t, __size)             \
                                                                         \
 /* Shared ring entry */                                                 \
 union __name##_sring_entry {                                            \
-    typeof ((__params)->req) req;                                       \
-    typeof ((__params)->rsp) rsp;                                       \
+    __req_t req;                                                        \
+    __rsp_t rsp;                                                        \
 } PACKED;                                                               \
                                                                         \
 /* Shared ring page */                                                  \
 struct __name##_sring {                                                 \
     RING_IDX req_prod;                                                  \
     RING_IDX rsp_prod;                                                  \
-    union __name##_sring_entry                                          \
-        ring[__SRING_SIZE(__params, sizeof (union __name##_sring_entry))];        \
+    union __name##_sring_entry ring[__RING_SIZE(__name, __size)];       \
 } PACKED;                                                               \
                                                                         \
 /* "Front" end's private variables */                                   \
@@ -149,106 +98,97 @@ typedef struct __name##_back_ring __name##_back_ring_t;
  * 
  *   N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.  
  *   This is OK in 1-for-1 request-response situations where the 
- *   requestor (front end) never has more than SRING_SIZE()-1
+ *   requestor (front end) never has more than RING_SIZE()-1
  *   outstanding requests.
  */
 
 
 /* Initialising empty rings */
-#define SHARED_RING_INIT(_p, _s) do {                                   \
+#define SHARED_RING_INIT(_s) do {                                       \
     (_s)->req_prod = 0;                                                 \
     (_s)->rsp_prod = 0;                                                 \
 } while(0)
 
-#define FRONT_RING_INIT(_p, _r, _s) do {                                \
+#define FRONT_RING_INIT(_r, _s) do {                                    \
     (_r)->req_prod_pvt = 0;                                             \
     (_r)->rsp_cons = 0;                                                 \
     (_r)->sring = (_s);                                                 \
 } while (0)
 
-#define BACK_RING_INIT(_p, _r, _s) do {                                 \
+#define BACK_RING_INIT(_r, _s) do {                                     \
     (_r)->rsp_prod_pvt = 0;                                             \
     (_r)->req_cons = 0;                                                 \
     (_r)->sring = (_s);                                                 \
 } while (0)
 
 /* Initialize to existing shared indexes -- for recovery */
-#define FRONT_RING_ATTACH(_p, _r, _s) do {                              \
+#define FRONT_RING_ATTACH(_r, _s) do {                                  \
     (_r)->sring = (_s);                                                 \
     (_r)->req_prod_pvt = (_s)->req_prod;                                \
     (_r)->rsp_cons = (_s)->rsp_prod;                                    \
 } while (0)
 
-#define BACK_RING_ATTACH(_p, _r, _s) do {                               \
+#define BACK_RING_ATTACH(_r, _s) do {                                   \
     (_r)->sring = (_s);                                                 \
     (_r)->rsp_prod_pvt = (_s)->rsp_prod;                                \
     (_r)->req_cons = (_s)->req_prod;                                    \
 } while (0)
 
-
-/* How to mask off a number for use as an offset into a ring 
- * N.B. This evalutes its second argument once but its first often */
-#define __SHARED_RING_MASK(_p, _s, _i)                                  \
-    ((_i) & (SRING_SIZE((_p), (_s)) - 1))
-
 /* How big is this ring? */
-#define RING_SIZE(_p, _r) SRING_SIZE((_p), (_r)->sring)
+#define RING_SIZE(_r)                                                   \
+    (sizeof((_r)->sring->ring)/sizeof((_r)->sring->ring[0]))
 
 /* How many empty slots are on a ring? */
-#define RING_PENDING_REQUESTS(_p, _r)                                   \
+#define RING_PENDING_REQUESTS(_r)                                       \
    ( ((_r)->req_prod_pvt - (_r)->rsp_cons) )
    
 /* Test if there is an empty slot available on the front ring. 
  * (This is only meaningful from the front. )
  */
-#define RING_FULL(_p, _r)                                               \
-    (((_r)->req_prod_pvt - (_r)->rsp_cons) == SRING_SIZE((_p), (_r)->sring))
+#define RING_FULL(_r)                                                   \
+    (((_r)->req_prod_pvt - (_r)->rsp_cons) == RING_SIZE(_r))
 
 /* Test if there are outstanding messages to be processed on a ring. */
-#define RING_HAS_UNCONSUMED_RESPONSES(_p, _r)                           \
+#define RING_HAS_UNCONSUMED_RESPONSES(_r)                               \
    ( (_r)->rsp_cons != (_r)->sring->rsp_prod )
    
-#define RING_HAS_UNCONSUMED_REQUESTS(_p, _r)                            \
+#define RING_HAS_UNCONSUMED_REQUESTS(_r)                                \
    ( ((_r)->req_cons != (_r)->sring->req_prod ) &&                      \
      (((_r)->req_cons - (_r)->rsp_prod_pvt) !=                          \
-      SRING_SIZE((_p), (_r)->sring)) )
+      RING_SIZE(_r)) )
       
 /* Test if there are messages waiting to be pushed. */
-#define RING_HAS_UNPUSHED_REQUESTS(_p, _r)                              \
+#define RING_HAS_UNPUSHED_REQUESTS(_r)                                  \
    ( (_r)->req_prod_pvt != (_r)->sring->req_prod )
    
-#define RING_HAS_UNPUSHED_RESPONSES(_p, _r)                             \
+#define RING_HAS_UNPUSHED_RESPONSES(_r)                                 \
    ( (_r)->rsp_prod_pvt != (_r)->sring->rsp_prod )
-   
 
 /* Copy the private producer pointer into the shared ring so the other end 
  * can see the updates we've made. */
-#define RING_PUSH_REQUESTS(_p, _r) do {                                 \
+#define RING_PUSH_REQUESTS(_r) do {                                     \
     wmb();                                                              \
     (_r)->sring->req_prod = (_r)->req_prod_pvt;                         \
 } while (0)
 
-#define RING_PUSH_RESPONSES(_p, _r) do {                                \
+#define RING_PUSH_RESPONSES(_r) do {                                    \
     wmb();                                                              \
     (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                         \
 } while (0)
 
-/* Direct access to individual ring elements, by index.  
- */
-#define RING_GET_REQUEST(_p, _r, _idx)                                  \
+/* Direct access to individual ring elements, by index. */
+#define RING_GET_REQUEST(_r, _idx)                                      \
  (&((_r)->sring->ring[                                                  \
-     __SHARED_RING_MASK((_p), (_r)->sring, (_idx))                      \
+     ((_idx) & (RING_SIZE(_r) - 1))                                     \
      ].req))
 
-#define RING_GET_RESPONSE(_p, _r, _idx)                                 \
+#define RING_GET_RESPONSE(_r, _idx)                                     \
  (&((_r)->sring->ring[                                                  \
-     __SHARED_RING_MASK((_p), (_r)->sring, (_idx))                      \
+     ((_idx) & (RING_SIZE(_r) - 1))                                     \
      ].rsp))   
     
-/* Loop termination condition: Would the specified index overflow the 
- * ring? 
- */
-#define RING_REQUEST_CONS_OVERFLOW(_p, _r, _cons)                      \
-    (((_cons) - (_r)->rsp_prod_pvt) >= SRING_SIZE((_p), (_r)->sring))
+/* Loop termination condition: Would the specified index overflow the ring? */
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)                           \
+    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
 
 #endif /* __XEN_PUBLIC_IO_RING_H__ */
index 6056d54f4074da902139c1e2d913dee8ecc2ebd2..7fb554607a7c688a00daa2856a7450b445c3b119 100644 (file)
@@ -54,8 +54,7 @@ typedef struct {
 #define USBIF_RSP_ERROR  -1 /* non-specific 'error' */
 #define USBIF_RSP_OKAY    0 /* non-specific 'okay'  */
 
-#define USBIF_RING RING_PARAMS(usbif_request_t, usbif_response_t, PAGE_SIZE)
-DEFINE_RING_TYPES(usbif, USBIF_RING);
+DEFINE_RING_TYPES(usbif, usbif_request_t, usbif_response_t, PAGE_SIZE);
 
 typedef struct {
     unsigned long length; /* IN = expected, OUT = actual */